home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Languguage OS 2
/
Languguage OS II Version 10-94 (Knowledge Media)(1994).ISO
/
language
/
awe
/
awe-full.lha
/
Awe2
/
DoNotUseThisSrc
/
MultiCpuMux.cc
< prev
next >
Wrap
C/C++ Source or Header
|
1990-08-08
|
14KB
|
585 lines
// This may look like C code, but it is really -*- C++ -*-
//
// Copyright (C) 1988 University of Illinois, Urbana, Illinois
//
// written by Dirk Grunwald (grunwald@cs.uiuc.edu)
//
#ifdef __GNUG__
# pragma implementation
#endif
#include "MultiCpuMux.h"
#include "CpuMultiplexorP.h"
#include "SpinLock.h"
#include "SpinBarrier.h"
#include "SpinFetchAndOp.h"
#include "Thread.h"
#include "HardwareContextP.h"
#include "ThreadContainer.h"
#include "ReserveByException.h"
#include "Pragma.h"
#include <math.h>
//
// Things left to do:
//
// + Make the ThreadHeap really use the Gnu PairingHeap structure.
// Doug Lea added an iterator class & other enhancements.
//
// + Capture signals, transfer them to an Exception class. Can
// use this to implement time-slices & the like, as well as....
//
// + Put in *addCpu* and *removeCpu* calls to CpuMultiplexor.
// This would allow run-time addition/removal of CPUS, so
// you can tailor your program to system
// This is tricky. Should probably do it when you
// advance the clock, but it'll be tricky to get all
// the CPUs to agree on the barrier height for the
// rendezvous. Also might complicate the *distinct
// pools of threads per cpu*.
//
static SpinLock CpuMultiplexorsLock;
static SpinFetchAndOp GlobalCurrentEventsCounter(0);
static SpinLock GivingUpLock;
static int GivingUpCounter = 0;
static int GivingUpGeneration = 0;
//
// A currentEvents pile for each processor. The count is only correct
// if youve reserved the spin lock -- its used as a guess.
//
static SpinLock CurrentEventsLock[MaxCpuMultiplexors];
static int CurrentEventsCounter[MaxCpuMultiplexors];
static ThreadContainer *CurrentEvents[MaxCpuMultiplexors];
//
// This can not be private, or we wont see all the action
//
MultiCpuMux::MultiCpuMux(int debug) : (debug)
{
pNameTemplate = "MultiCpuMux";
sprintf(nameSpace, "[%s-%d] ", pNameTemplate, iYam);
CpuMuxDebugFlag = debug;
}
MultiCpuMux::~MultiCpuMux()
{
}
//
// Add a single CPU to a set of current CPUs. There is an advantage of
// having all child processes be spawned by CPU #0; all child signals
// will be caught by the single parent.
//
// This entry is called by a Thread.
//
void MultiCpuMux::enrollCpu()
{
//
// move thread to master process. There's a distinct possibility
// that this guy will get stolen from Cpu #0 if everyone else is
// looking for work.
//
while (iYam != 0) {
currentThread -> cpuAffinity = 0;
relocateException.cpu(0);
raise( &relocateException );
}
//
// If we're only using a single
//
//
// raise an exception to do the actual fork. This means that
// control flow for the new child process will be in the
// stirItAround loop, as opposed to here.
//
enrollDismissCpuException.enroll();
raise( &enrollDismissCpuException );
currentThread -> cpuAffinity = -1;
}
void
MultiCpuMux::dismissCpu()
{
assert(0);
}
void
MultiCpuMux::allocateLocalEventStructures(int newIYam, int outOf)
{
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "Allocate CpuMux structures for " << newIYam << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
iYam = newIYam;
sprintf(nameSpace, "[%s-%d] ", pNameTemplate, iYam);
pName = nameSpace;
globalCurrentEventsCounter = &GlobalCurrentEventsCounter;
CurrentEventsCounter[iYam] = 0;
CurrentEvents[iYam] = AllocateHardwareCurrentEventsStructure();
myCurrentEvents = CurrentEvents[iYam];
myCurrentEventsLock = &CurrentEventsLock[iYam];
myCurrentEventsCounter = &CurrentEventsCounter[iYam];
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "set CpuMultiplexors to " << outOf << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
CpuMultiplexorsLock.reserve();
CpuMultiplexors = outOf;
GivingUpLock.reserve();
if (GivingUpCounter >= CpuMultiplexors) {
GivingUpGeneration++;
GivingUpCounter = 0;
}
GivingUpLock.release();
CpuMultiplexorsLock.release();
}
void
MultiCpuMux::allocateEventStructures(int newIYam, int outOf)
{
allocateLocalEventStructures(newIYam, outOf);
}
void
MultiCpuMux::deallocateEventStructures()
{
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "Deallocate CpuMux structures for " << iYam << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
myCurrentEventsLock -> reserve();
//
// Move remaining events to another queue. We're not adding new events,
// just moving them around, so we don't increase GlobalCurrentEventsCounter
//
while ( CurrentEventsCounter[iYam] > 0 ) {
CurrentEventsLock[0].reserve();
assert(CurrentEvents[0] != 0);
while( ! myCurrentEvents -> isEmpty() ) {
CurrentEvents[0] -> add( myCurrentEvents -> remove() );
CurrentEventsCounter[0]++;
CurrentEventsCounter[iYam]--;
}
CurrentEventsLock[0].release();
}
CpuMultiplexorsLock.reserve();
CpuMultiplexors--;
GivingUpLock.reserve();
if (GivingUpCounter >= CpuMultiplexors) {
GivingUpGeneration++;
GivingUpCounter = 0;
}
GivingUpLock.release();
CpuMultiplexorsLock.release();
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "set CpuMultiplexors to " << CpuMultiplexors;
cerr << " and trigger GivingUp\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
delete myCurrentEvents;
myCurrentEvents = 0;
CurrentEvents[iYam] = 0;
CurrentEventsCounter[iYam] = 0;
myCurrentEventsLock -> release();
}
void
MultiCpuMux::fireItUp(int cpus, unsigned shared)
{
assert(cpus > 0);
if ( cpus > MaxCpuMultiplexors ) {
cpus = MaxCpuMultiplexors;
}
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "Allocate " << shared << " bytes of shared memory\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
if ( cpus > 1 ) {
extern void SharedMemoryInit( unsigned );
SharedMemoryInit( shared );
}
warmThePot(cpus);
stirItAround();
coolItDown();
}
void
MultiCpuMux::warmThePot(int cpus)
{
assert(cpus > 0);
if ( cpus > MaxCpuMultiplexors ) {
cpus = MaxCpuMultiplexors;
}
CpuMultiplexors = cpus;
enabled = 1;
//
// Spawn the children, giving each a unique number from 0..(cpus-1).
// The first child gets id (cpus-1), and the original process gets 0.
//
iYam = 0;
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "Allocate " << CpuMultiplexors << " cpus\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
for (int whoAmI = 1; whoAmI < CpuMultiplexors; whoAmI++) {
if (iYam == 0) {
int pid = fork();
if (pid == 0) { // child
sleep(10);
allocateEventStructures(whoAmI, CpuMultiplexors);
break;
}
}
}
pid = getpid();
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "I am now id " << iYam << " and pid " << pid <<" \n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
}
void
MultiCpuMux::coolItDown()
{
if (iYam > 0) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "exit\n";
CpuCerrLock.release();
}
#endif
deallocateEventStructures();
_exit(0);
}
else {
//
// reap the dead children. This way we know they are all dead.
// The caller can then safely exit.
//
while (CpuMultiplexors > 1) {
int pid = wait(0);
if (pid == -1) {
perror("wait");
break;
}
}
//
// In case of break in above loop
//
CpuMultiplexors = 1;
}
}
void
MultiCpuMux::add(Thread *who)
{
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
if (who != 0 && who -> name() != 0) {
cerr << name() << " add " << who -> name() << "\n";
} else {
cerr << name() << " add " << hex(long(who)) << "\n";
}
CpuCerrLock.release();
}
#endif /* NDEBUG */
myCurrentEventsLock -> reserve();
addUnlocked( who );
(*myCurrentEventsCounter)++;
myCurrentEventsLock -> release();
GlobalCurrentEventsCounter.add(1);
}
void
MultiCpuMux::addToAnother(int cpu, Thread *who)
{
assert( cpu >= 0 && cpu < CpuMultiplexors );
CurrentEventsLock[cpu].reserve();
CurrentEvents[cpu] -> add( who );
CurrentEventsCounter[cpu]++;
CurrentEventsLock[cpu].release();
GlobalCurrentEventsCounter.add(1);
}
Thread *
MultiCpuMux::remove()
{
//
// Check to see if there is a current event, either in our current
// events queue or someone elses current events queue. If there is
// nothing, return 0.
//
Thread *threadToExecute = 0;
//
// System stopped?
//
if (*terminated) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << " Stopping muxing \n" ;
CpuCerrLock.release();
}
#endif /* NDEBUG */
return(0);
}
//
// I got something to do?
//
myCurrentEventsLock -> reserve();
if ( *myCurrentEventsCounter > 0 ) {
threadToExecute = myCurrentEvents -> remove();
(*myCurrentEventsCounter) --;
}
myCurrentEventsLock -> release();
//
// Maybe someone else has something to do?
//
if ( threadToExecute == 0 && GlobalCurrentEventsCounter.value() > 0 ) {
int ask = iYam;
do {
ask++; // start with next person,
if ( ask >= CpuMultiplexors ) { // wrap around for fairness
ask = 0;
}
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "Ask " << ask << " about events \n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
//
// Note that were *not* locking before looking
// at CurrentEventsCount -- we treat this as a *guess*
// before bothering to lock on it. Admittedly, this could
// cause a problem, so maybe the second time around,
// we should always reserve and then look.
//
if ( CurrentEventsCounter[ask] > 0) {
CurrentEventsLock[ask].reserve();
if ( CurrentEventsCounter[ask] > 0) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name();
cerr << "Found one in " << ask << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
threadToExecute = CurrentEvents[ask] -> remove();
//
// Check that this thread isnt trying to get to
// a specific CPU.
//
if (threadToExecute -> cpuAffinity > 0 &&
threadToExecute -> cpuAffinity != iYam) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name();
cerr << "but returned it because of afinity\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
CurrentEvents[ask] -> add(threadToExecute);
threadToExecute = 0;
}
else {
CurrentEventsCounter[ask]--;
}
}
CurrentEventsLock[ask].release();
}
} while (ask != iYam && threadToExecute == 0);
}
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "find ";
if (threadToExecute == 0) {
cerr << "nothing\n";
} else {
cerr << threadToExecute -> name() << "\n";
}
CpuCerrLock.release();
}
#endif /* NDEBUG */
if ( threadToExecute != 0 ) {
GlobalCurrentEventsCounter.add(-1);
}
return( threadToExecute );
}
//
// This is the job dispatcher.
//
void
MultiCpuMux::stirItAround()
{
currentThread = 0;
if (!enabled) {
cerr << "Need to initialize CpuMultiplexor before using it\n";
}
while( ! *terminated ) {
while ( currentThread == 0 ) {
currentThread = remove();
if (currentThread != 0) {
break;
}
GivingUpLock.reserve();
GivingUpCounter++;
assert( GivingUpCounter > 0 && GivingUpCounter <= CpuMultiplexors);
if ( GivingUpCounter == CpuMultiplexors
&& GlobalCurrentEventsCounter.value() == 0)
{
GivingUpGeneration ++;
GivingUpCounter = 0;
GivingUpLock.release();
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << "give up\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
return;
}
else {
VOLATILE int generation = GivingUpGeneration;
VOLATILE int *genp = &GivingUpGeneration;
GivingUpLock.release();
while( generation == *genp
&& GlobalCurrentEventsCounter.value() == 0
&& !*terminated );
GivingUpLock.reserve();
if ( *genp != generation || *terminated ) {
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << " giving up\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
GivingUpLock.release();
return;
}
else {
GivingUpCounter--;
assert(GivingUpCounter >= 0);
GivingUpLock.release();
#ifndef NDEBUG
if (CpuMuxDebugFlag) {
CpuCerrLock.reserve();
cerr << name() << " check for something";
cerr << " i have " << *myCurrentEventsCounter;
cerr << " out of " ;
cerr << GlobalCurrentEventsCounter.value() << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
}
}
}
#ifndef NDEBUG
if (CpuMuxDebugFlag || currentThread -> debug()) {
CpuCerrLock.reserve();
cerr << name() << " switch to ";
cerr << currentThread->name() << "\n";
CpuCerrLock.release();
}
#endif /* NDEBUG */
#ifdef DEBUG_MALLOC
assert( malloc_verify() );
#endif DEBUG_MALLOC
systemContext.switchContext(&(currentThread -> pContext));
#ifdef DEBUG_MALLOC
assert( malloc_verify() );
#endif DEBUG_MALLOC
assert(raisedBy != 0);
raisedBy -> handleException();
raisedBy = 0;
}
}